Also use a macro to get EPT walk length.
signed-off-by: Xin Li <xin.li@intel.com>
MSR_IA32_VMX_PROCBASED_CTLS2, &mismatch);
}
+ /* The IA32_VMX_EPT_VPID_CAP MSR exists only when EPT or VPID available */
+ if ( _vmx_secondary_exec_control & (SECONDARY_EXEC_ENABLE_EPT |
+ SECONDARY_EXEC_ENABLE_VPID) )
+ {
+ rdmsrl(MSR_IA32_VMX_EPT_VPID_CAP, _vmx_ept_vpid_cap);
+
+ /*
+ * Additional sanity checking before using EPT:
+ * 1) the CPU we are running on must support EPT WB, as we will set
+ * ept paging structures memory type to WB;
+ * 2) the CPU must support the EPT page-walk length of 4 according to
+ * Intel SDM 25.2.2.
+ *
+ * Or we just don't use EPT.
+ */
+ if ( !(_vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_WB) ||
+ !(_vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED) )
+ _vmx_secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
+ }
+
if ( _vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT )
{
/*
SECONDARY_EXEC_UNRESTRICTED_GUEST);
}
- /* The IA32_VMX_EPT_VPID_CAP MSR exists only when EPT or VPID available */
- if ( _vmx_secondary_exec_control &
- (SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_ENABLE_VPID) )
- rdmsrl(MSR_IA32_VMX_EPT_VPID_CAP, _vmx_ept_vpid_cap);
-
if ( (_vmx_secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING) &&
ple_gap == 0 )
{
{
int rc;
- d->arch.hvm_domain.vmx.ept_control.etmt = EPT_DEFAULT_MT;
- d->arch.hvm_domain.vmx.ept_control.gaw = EPT_DEFAULT_GAW;
+ /* Set the memory type used when accessing EPT paging structures. */
+ d->arch.hvm_domain.vmx.ept_control.ept_mt = EPT_DEFAULT_MT;
+
+ /* set EPT page-walk length, now it's actual walk length - 1, i.e. 3 */
+ d->arch.hvm_domain.vmx.ept_control.ept_wl = 3;
+
d->arch.hvm_domain.vmx.ept_control.asr =
pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
-
if ( (rc = vmx_alloc_vlapic_mapping(d)) != 0 )
return rc;
ASSERT(table != NULL);
- for ( i = EPT_DEFAULT_GAW; i > walk_level; i-- )
+ for ( i = ept_get_wl(d); i > walk_level; i-- )
{
ret = ept_next_level(d, 0, &table, &gfn_remainder, i * EPT_TABLE_ORDER);
if ( !ret )
/* Should check if gfn obeys GAW here. */
- for ( i = EPT_DEFAULT_GAW; i > 0; i-- )
+ for ( i = ept_get_wl(d); i > 0; i-- )
{
retry:
ret = ept_next_level(d, 1, &table, &gfn_remainder,
if ( gfn > d->arch.p2m->max_mapped_pfn )
goto out;
- for ( i = EPT_DEFAULT_GAW; i > 0; i-- )
+ for ( i = ept_get_wl(d); i > 0; i-- )
{
ret = ept_next_level(d, 1, &table, &gfn_remainder,
i * EPT_TABLE_ORDER);
goto out;
}
- for ( i = EPT_DEFAULT_GAW; i >= 0; i-- )
+ for ( i = ept_get_wl(d); i >= 0; i-- )
{
ept_entry_t *ept_entry, *next;
u32 index;
if ( pagetable_get_pfn(p2m_get_pagetable(p2m_get_hostp2m(d))) == 0 )
return;
- BUG_ON(EPT_DEFAULT_GAW != 3);
-
l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)))));
for (i4 = 0; i4 < EPT_PAGETABLE_ENTRIES; i4++ )
{
int order;
int i;
int is_pod;
- int ret;
+ int ret = 0;
unsigned long index;
unsigned long gfn, gfn_remainder;
unsigned long record_counter = 0;
table =
map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
- for ( i = EPT_DEFAULT_GAW; i > 0; i-- )
+ for ( i = ept_get_wl(d); i > 0; i-- )
{
ret = ept_next_level(d, 1, &table, &gfn_remainder,
i * EPT_TABLE_ORDER);
unsigned long msrs[VMX_MSR_COUNT];
};
-#define EPT_DEFAULT_MT 6
-#define EPT_DEFAULT_GAW 3
+#define EPT_DEFAULT_MT MTRR_TYPE_WRBACK
struct vmx_domain {
unsigned long apic_access_mfn;
union {
struct {
- u64 etmt :3,
- gaw :3,
- rsvd :6,
- asr :52;
+ u64 ept_mt :3,
+ ept_wl :3,
+ rsvd :6,
+ asr :52;
};
u64 eptp;
} ept_control;
cpumask_t ept_synced;
};
+#define ept_get_wl(d) \
+ ((d)->arch.hvm_domain.vmx.ept_control.ept_wl)
+
struct arch_vmx_struct {
/* Virtual address of VMCS. */
struct vmcs_struct *vmcs;
extern bool_t cpu_has_vmx_ins_outs_instr_info;
-extern u64 vmx_ept_vpid_cap;
-
+#define VMX_EPT_WALK_LENGTH_4_SUPPORTED 0x00000040
+#define VMX_EPT_MEMORY_TYPE_UC 0x00000100
+#define VMX_EPT_MEMORY_TYPE_WB 0x00004000
#define VMX_EPT_SUPERPAGE_2MB 0x00010000
#define VMX_EPT_SUPERPAGE_1GB 0x00020000
(vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
#define cpu_has_vmx_ept \
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)
-#define cpu_has_vmx_ept_1gb \
- (vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_1GB)
-#define cpu_has_vmx_ept_2mb \
- (vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_2MB)
#define cpu_has_vmx_vpid \
(vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
#define cpu_has_monitor_trap_flag \
#define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */
#define MODRM_EAX_ECX ".byte 0xc1\n" /* EAX, ECX */
+extern u64 vmx_ept_vpid_cap;
+
+#define cpu_has_vmx_ept_wl4_supported \
+ (vmx_ept_vpid_cap & VMX_EPT_WALK_LENGTH_4_SUPPORTED)
+#define cpu_has_vmx_ept_mt_uc \
+ (vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_UC)
+#define cpu_has_vmx_ept_mt_wb \
+ (vmx_ept_vpid_cap & VMX_EPT_MEMORY_TYPE_WB)
+#define cpu_has_vmx_ept_1gb \
+ (vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_1GB)
+#define cpu_has_vmx_ept_2mb \
+ (vmx_ept_vpid_cap & VMX_EPT_SUPERPAGE_2MB)
+
+
static inline void __vmptrld(u64 addr)
{
asm volatile ( VMPTRLD_OPCODE